Load libraries

library(tidyverse) # data manipulation
library(ggpubr) # producing data exploratory plots
library(modelsummary) # descriptive data 
library(glmmTMB) # running generalised mixed models 
library(DHARMa) # model diagnostics 
library(performance) # model diagnostics  
library(ggeffects) # partial effect plots 
library(car) # running Anova on model 
library(emmeans) # post-hoc analysis 

Import data

m1 <- read_csv("import_data/1_month_size_data_2022_2023.csv") |> 
  mutate(across(1:15,factor)) |> 
  mutate(STANDARD_LENGTH =LENGTH, 
         .keep = "unused") |> 
  select(!(NOTES)) |> 
  select(1:15,"STANDARD_LENGTH","MASS")
           
m2 <- read_csv("import_data/2_month_size_data_2022_2023.csv") |> 
  mutate(across(1:15,factor)) |> 
  mutate(STANDARD_LENGTH =LENGTH, 
         .keep = "unused") |> 
  select(!(NOTES)) |> 
  select(1:15,"STANDARD_LENGTH","MASS")
m2.5 <- read_csv("import_data/2-5_month_size_data_2022_2023.csv") |> 
  mutate(across(1:15,factor)) |> 
  mutate(STANDARD_LENGTH =LENGTH, 
         .keep = "unused") |> 
  select(!(NOTES)) |> 
  select(1:15,"STANDARD_LENGTH","MASS")
adult <- read_csv("import_data/adult_size_2022_2023.csv") |> 
  mutate(across(1:3,factor), 
         MALE = FISH_ID, 
         FEMALE = FISH_ID, 
         POPULATION = str_sub(FISH_ID, 2,4), 
         POPULATION = case_when(POPULATION == "ARL" ~ "Arlington Reef", 
                                POPULATION == "SUD" ~ "Sudbury Reef",
                                POPULATION == "VLA" ~ "Vlassof cay",
                                POPULATION == "PRE" ~ "Pretty patches", 
                                TRUE ~ POPULATION)) |> 
  left_join(select(m1, c("MALE","TEMPERATURE")), 
             by="MALE") |> 
  left_join(select(m1, c("FEMALE","TEMPERATURE")), 
             by="FEMALE") |>
  distinct() |> 
  mutate(TEMPERATURE = coalesce(TEMPERATURE.x, TEMPERATURE.y)) |> 
  drop_na(TEMPERATURE) |> 
  select(-c("TEMPERATURE.x","TEMPERATURE.y"))

Data manipulation

m1_df <- m1 |> 
  left_join(select(adult, c("MALE", "SL", "MASS")), 
            by ="MALE") |> 
  mutate(SL_MALE =SL, 
         MASS_MALE =MASS.y, 
         .keep = "unused") |>
  left_join(select(adult, c("FEMALE", "SL", "MASS")), 
            by ="FEMALE") |> 
  mutate(SL_FEMALE =SL, 
         MASS_FEMALE =MASS, 
         .keep ="unused") |> 
  mutate(SL_MIDPOINT = (SL_MALE+SL_FEMALE)/2, 
         MASS_MIDPOINT = (MASS_MALE+MASS_FEMALE)/2)

m2_df <- m2 |> 
  left_join(select(adult, c("MALE", "SL", "MASS")), 
            by ="MALE") |> 
  mutate(SL_MALE =SL, 
         MASS_MALE =MASS.y, 
         .keep = "unused") |>
  left_join(select(adult, c("FEMALE", "SL", "MASS")), 
            by ="FEMALE") |> 
  mutate(SL_FEMALE =SL, 
         MASS_FEMALE =MASS, 
         .keep ="unused") |> 
  mutate(SL_MIDPOINT = (SL_MALE+SL_FEMALE)/2, 
         MASS_MIDPOINT = (MASS_MALE+MASS_FEMALE)/2)

m2.5_df <- m2.5 |> 
  left_join(select(adult, c("MALE", "SL", "MASS")), 
            by ="MALE") |> 
  mutate(SL_MALE =SL, 
         MASS_MALE =MASS.y, 
         .keep = "unused") |>
  left_join(select(adult, c("FEMALE", "SL", "MASS")), 
            by ="FEMALE") |> 
  mutate(SL_FEMALE =SL, 
         MASS_FEMALE =MASS, 
         .keep ="unused") |> 
  mutate(SL_MIDPOINT = (SL_MALE+SL_FEMALE)/2, 
         MASS_MIDPOINT = (MASS_MALE+MASS_FEMALE)/2) |> 
  drop_na(SL_MALE)

Exploratory data analysis

STANDARD_LENGTH

plot1 <- ggplot(m1_df, aes(x=SL_MALE, y=STANDARD_LENGTH, color=TEMPERATURE)) +
  geom_point(alpha=0.05) + 
  stat_smooth(method = "lm") + 
  theme_classic()

plot2 <- ggplot(m1_df, aes(x=SL_FEMALE, y=STANDARD_LENGTH, color=TEMPERATURE)) +
  geom_point(alpha=0.05) + 
  stat_smooth(method = "lm") + 
  theme_classic()

plot3 <- ggplot(m1_df, aes(x=SL_MIDPOINT, y=STANDARD_LENGTH, color=TEMPERATURE)) +
  geom_point(alpha=0.05) + 
  stat_smooth(method = "lm") + 
  theme_classic()

ggarrange(plot1, plot2, plot3, 
          nrow =1, 
          ncol =3, 
          common.legend = TRUE)

MASS

plot1 <- ggplot(m1_df, aes(x=MASS_MALE, y=MASS.x, color=TEMPERATURE)) +
  geom_point(alpha=0.05) + 
  stat_smooth(method = "lm") +
  ylim(0,0.15) +
  theme_classic()

plot2 <- ggplot(m1_df, aes(x=MASS_FEMALE, y=MASS.x, color=TEMPERATURE)) +
  geom_point(alpha=0.05) + 
  stat_smooth(method = "lm") + 
  ylim(0,0.15) +
  theme_classic()

plot3 <- ggplot(m1_df, aes(x=MASS_MIDPOINT, y=MASS.x, color=TEMPERATURE)) +
  geom_point(alpha=0.05) + 
  stat_smooth(method = "lm") + 
  ylim(0,0.15) +
  theme_classic()

ggarrange(plot1, plot2, plot3, 
          nrow =1, 
          ncol =3, 
          common.legend = TRUE)

Descriptive statistics

counts

Adults

datasummary(Factor(POPULATION) ~ Factor(TEMPERATURE), 
            data=adult, 
            fmt = "%.0f")
tinytable_a69ri0exrgnq1kbpgw2n
POPULATION 27 28.5 30
Arlington Reef 8 8 8
Pretty patches 4 6 6
Sudbury Reef 4 4 2
Vlassof cay 6 2 6

1-months

datasummary(Factor(POPULATION) ~ Factor(TEMPERATURE), 
            data=m1_df, 
            fmt = "%.0f")
tinytable_t1hl8w19aqbevhc3wbbs
POPULATION 27 28.5 30
Arlington Reef 231 105 202
Pretty Patches 116 82 142
Sudbury Reef 117 55 47
Vlassof Cay 114 60 120

2-months

datasummary(Factor(POPULATION) ~ Factor(TEMPERATURE), 
            data=m2_df, 
            fmt = "%.0f")
tinytable_icqtpgsft2n2dr8a4b54
POPULATION 27 28.5 30
Arlington Reef 198 108 152
Pretty Patches 113 83 134
Sudbury Reef 113 60 56
Vlassof Cay 77 58 113

2.5-months

datasummary(Factor(POPULATION) ~ Factor(TEMPERATURE), 
            data=m2.5_df, 
            fmt = "%.0f")
tinytable_6e5viwmea009m5u8kpie
POPULATION 27 28.5 30
Arlington Reef 239 103 211
Pretty Patches 100 83 133
Sudbury Reef 111 53 50
Vlassof Cay 102 60 106

standard length

Adults

datasummary(Factor(TEMPERATURE) ~ SL * (NUnique + mean + median + min + max + sd + Histogram), 
            data = drop_na(adult, SL),  
            fmt = "%.2f")
tinytable_249os357cyc00q76hi1u
TEMPERATURE NUnique mean median min max sd Histogram
27 21 97.14 100.60 84.76 104.42 7.00 ▃▂▁▁▂▁▆▇
28.5 20 91.72 93.83 72.47 100.00 7.57 ▁▁▂▃▅▇▃
30 22 92.52 93.22 80.34 101.22 6.44 ▂▅▂▃▂▃▇▂▇▅

1-months

datasummary(Factor(TEMPERATURE) ~ STANDARD_LENGTH * (NUnique + mean + median + min + max + sd + Histogram), 
            data = drop_na(m1_df, STANDARD_LENGTH),  
            fmt = "%.2f")
tinytable_al3g456zx98rzrwkxxiz
TEMPERATURE NUnique mean median min max sd Histogram
27 402 12.94 12.99 8.25 18.05 1.86 ▁▂▃▆▇▇▄▃▁
28.5 220 13.26 13.23 8.74 17.87 1.45 ▂▄▇▅▆▁
30 344 13.51 13.48 7.73 17.76 1.63 ▁▃▅▇▆▄▂

2-months

datasummary(Factor(TEMPERATURE) ~ STANDARD_LENGTH * (NUnique + mean + median + min + max + sd + Histogram), 
            data = drop_na(m2_df, STANDARD_LENGTH),  
            fmt = "%.2f")
tinytable_kd2ocsi10n4yzkk919zp
TEMPERATURE NUnique mean median min max sd Histogram
27 368 20.36 20.81 10.73 27.53 2.41 ▁▁▂▆▇▃
28.5 264 20.27 20.47 12.87 26.83 2.40 ▁▁▄▅▇▆▂▁
30 351 20.08 20.28 11.69 26.14 2.58 ▁▁▁▁▄▇▇▄▂

2.5-months

datasummary(Factor(TEMPERATURE) ~ STANDARD_LENGTH * (NUnique + mean + median + min + max + sd + Histogram), 
            data = drop_na(m2.5_df, STANDARD_LENGTH),  
            fmt = "%.2f")
tinytable_luiy09wm54t99izzy5cc
TEMPERATURE NUnique mean median min max sd Histogram
27 439 24.15 24.56 11.92 31.70 3.02 ▁▁▂▄▇▅▂
28.5 267 23.65 23.98 14.69 30.15 2.98 ▁▁▁▃▅▆▇▆▃▁
30 382 23.58 23.94 13.74 31.04 2.61 ▁▁▂▄▇▇▃▁

mass

Adults

datasummary(Factor(TEMPERATURE) ~ MASS * (NUnique + mean + median + min + max + sd + Histogram), 
            data = drop_na(adult, MASS),  
            fmt = "%.2f")
tinytable_fp977t774uyqv97tyf9d
TEMPERATURE NUnique mean median min max sd Histogram
27 21 46.93 50.63 29.85 59.93 10.30 ▇▁▃▄▄▆▄
28.5 20 38.81 42.36 16.28 53.26 10.26 ▁▁▄▃▄▇▆▁
30 22 39.94 39.62 23.91 57.31 9.43 ▅▇▂▇▇▇▂▇▅▂

1-months

datasummary(Factor(TEMPERATURE) ~ MASS.x * (NUnique + mean + median + min + max + sd + Histogram), 
            data = drop_na(m1_df, MASS.x),  
            fmt = "%.2f")
tinytable_0tjzzs1ll8c53m5iv9ow
TEMPERATURE NUnique mean median min max sd Histogram
27 437 0.06 0.06 0.01 0.22 0.03 ▃▇▇▄▂▁
28.5 248 0.07 0.06 0.02 0.19 0.02 ▁▅▇▄▃
30 389 0.07 0.07 0.01 0.15 0.02 ▄▅▇▆▅▃▂▁

2-months

datasummary(Factor(TEMPERATURE) ~ MASS.x * (NUnique + mean + median + min + max + sd + Histogram), 
            data = drop_na(m2_df, MASS.x),  
            fmt = "%.2f")
tinytable_yp9dhwctpjxez48vwer3
TEMPERATURE NUnique mean median min max sd Histogram
27 460 0.27 0.27 0.03 0.68 0.10 ▁▂▄▇▆▃▁
28.5 297 0.28 0.27 0.06 0.64 0.10 ▁▃▇▇▇▄▁▁▁
30 425 0.28 0.27 0.04 0.62 0.11 ▂▂▅▇▇▄▂▁▁

2.5-months

datasummary(Factor(TEMPERATURE) ~ MASS.x * (NUnique + mean + median + min + max + sd + Histogram), 
            data = drop_na(m2.5_df, MASS.x),  
            fmt = "%.2f")
tinytable_wud0n0nmcudhxgy115wg
TEMPERATURE NUnique mean median min max sd Histogram
27 529 0.46 0.46 0.04 1.00 0.17 ▁▂▄▇▇▆▄▁▁
28.5 293 0.45 0.44 0.08 1.00 0.18 ▂▄▆▇▆▅▃▂▁
30 477 0.45 0.45 0.08 0.96 0.15 ▁▂▄▆▇▄▂▁

Fit models [random factors]

1-month

modelNULL <- glmmTMB(STANDARD_LENGTH ~ 1, 
                  family=gaussian(),
                  data =m1_df)

model1 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER), 
                  family=gaussian(),
                  data =m1_df) 

model2 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER) + (1|LEVEL), 
                  family=gaussian(),
                  data = m1_df)

model3 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER) + (1|CLUTCH_ORDER), 
                  family=gaussian(),
                  data = m1_df)

model4 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER) + (1|REGION), 
                  family=gaussian(),
                  data = m1_df) 

model5 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER) + (1|REGION) + (1|POPULATION), 
                  family=gaussian(),
                  data = m1_df) 

model6 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER) + (1|CLUTCH_ORDER) + (1|REGION) + (1|POPULATION), 
                  family=gaussian(),
                  data = m1_df) 

AIC(modelNULL, model1, model2, model3, model4, model5, model6) 
BIC(modelNULL, model1, model2, model3, model4, model5, model6)

2-month

modelNULL <- glmmTMB(STANDARD_LENGTH ~ 1, 
                  family=gaussian(),
                  data =m2_df)

model1 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER), 
                  family=gaussian(),
                  data =m2_df) 

model2 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER) + (1|LEVEL), 
                  family=gaussian(),
                  data = m2_df)

model3 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER) + (1|CLUTCH_ORDER), 
                  family=gaussian(),
                  data = m2_df)

model4 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER) + (1|REGION), 
                  family=gaussian(),
                  data = m2_df) 

model5 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER) + (1|REGION) + (1|POPULATION), 
                  family=gaussian(),
                  data = m2_df) 

model6 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER) + (1|CLUTCH_ORDER) + (1|REGION) + (1|POPULATION), 
                  family=gaussian(),
                  data = m2_df) 

AIC(modelNULL, model1, model2, model3, model4, model5, model6) 
BIC(modelNULL, model1, model2, model3, model4, model5, model6)

2.5-month

modelNULL <- glmmTMB(STANDARD_LENGTH ~ 1, 
                  family=gaussian(),
                  data =m2.5_df)

model1 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER), 
                  family=gaussian(),
                  data =m2.5_df) 

model2 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER) + (1|LEVEL), 
                  family=gaussian(),
                  data = m2.5_df)

model3 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER) + (1|CLUTCH_ORDER), 
                  family=gaussian(),
                  data = m2.5_df)

model4 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER) + (1|REGION), 
                  family=gaussian(),
                  data = m2.5_df) 

model5 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER) + (1|REGION) + (1|POPULATION), 
                  family=gaussian(),
                  data = m2.5_df) 

model6 <- glmmTMB(STANDARD_LENGTH ~ (1|CLUTCH_NUMBER) + (1|CLUTCH_ORDER) + (1|REGION) + (1|POPULATION), 
                  family=gaussian(),
                  data = m2.5_df) 

AIC(modelNULL, model1, model2, model3, model4, model5, model6) 
BIC(modelNULL, model1, model2, model3, model4, model5, model6)

For standard length measurements at differrent time periods, including 1, 2, and 2.5 months the best model is the most simply model (i.e., model1), where the only random factor that is present is CLUTCH_NUMBER.

Fit model [fixed factors]

Now that we have figured out which random factors will be included within out generalized linear mixed effects model we can start to explore different hypothesese by adding in our fixed factors - covariates.

Fixed factors that will be included will be those that are essential to answering the initial research question based on heiritability of traits between offspring and parental fish - labelled as MALE and FEMALE in the dataframe as well as their combined score MIDPOINT, if applicable. TEMPERATURE is also essential to answering the main research question that looks to see if heritability changes at different temperatures.

Our main research hypothesis will be modelled using the formula below”

STANDARD_LENGTH ~ SL_(FE)MALE*TEMPERATURE + (1|CLUTCH_NUMBER)

An alternative research hypothesis will will test will include an interaction with PARENTAL_DAYS_IN_TEMPERATURE to see if heritability was affect by how long adults spent at experimental temperatures. This model may look something like:

STANDARD_LENGTH ~ SL_(FE)MALE*TEMPERATURE:PARENTAL_DAYS_IN_TREATMENT + (1|CLUTCH_NUMBER)

Lets start fitting models:

offspring-male

1-month

Models

Main hypothesis
model1a <- glmmTMB(STANDARD_LENGTH ~ SL_MALE*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family='gaussian', 
                    data=m1_df)
Alternative hypothesis
model1b <- glmmTMB(STANDARD_LENGTH ~ SL_MALE*TEMPERATURE*as.numeric(PARENTAL_DAYS_IN_TREATMENT) + (1|CLUTCH_NUMBER), 
                    family='gaussian', 
                    data=m1_df)

Model selection

AIC(modelNULL, model1a, model1b, k=12) 
## Warning in AIC.default(modelNULL, model1a, model1b, k = 12): models are not all
## fitted to the same number of observations
BIC(modelNULL, model1a, model1b)
## Warning in BIC.default(modelNULL, model1a, model1b): models are not all fitted
## to the same number of observations

Model1a was selected as the best model and will be used going forward.

Model validation

DHARMa
model1a |> 
  simulateResiduals(plot=TRUE)  

## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help. 
##  
## Scaled residual values: 0.876 0.92 0.98 0.568 0.712 0.62 0.916 0.7 0.296 0.888 0.196 0.596 0.612 0.812 0.536 0.22 0.884 0.62 0.52 0.924 ...
model1a |> testResiduals(plot=T) 

## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.021579, p-value = 0.5377
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.013, p-value = 0.832
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 13, observations = 1388, p-value = 0.5431
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.004996154 0.015962883
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                            0.009365994
## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.021579, p-value = 0.5377
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.013, p-value = 0.832
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 13, observations = 1388, p-value = 0.5431
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.004996154 0.015962883
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                            0.009365994
performance
model1a |> 
  check_model(detrend=FALSE)
## `check_outliers()` does not yet support models of class `glmmTMB`.

Partial effect plots

model1a |> ggemmeans(~SL_MALE|TEMPERATURE) |> 
  plot(add.data =FALSE) 

model1a |> ggemmeans(~TEMPERATURE) |> 
  plot(add.data =FALSE) 
## NOTE: Results may be misleading due to involvement in interactions

Model investigation

Summary
model1a |> summary()
##  Family: gaussian  ( identity )
## Formula:          STANDARD_LENGTH ~ SL_MALE * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m1_df
## 
##      AIC      BIC   logLik deviance df.resid 
##   4522.3   4564.2  -2253.2   4506.3     1380 
## 
## Random effects:
## 
## Conditional model:
##  Groups        Name        Variance Std.Dev.
##  CLUTCH_NUMBER (Intercept) 1.204    1.097   
##  Residual                  1.339    1.157   
## Number of obs: 1388, groups:  CLUTCH_NUMBER, 50
## 
## Dispersion estimate for gaussian family (sigma^2): 1.34 
## 
## Conditional model:
##                          Estimate Std. Error z value Pr(>|z|)   
## (Intercept)              1.705421   3.835079   0.445  0.65654   
## SL_MALE                  0.115953   0.039683   2.922  0.00348 **
## TEMPERATURE28.5          1.173998   6.946791   0.169  0.86580   
## TEMPERATURE30            9.270902   5.175407   1.791  0.07324 . 
## SL_MALE:TEMPERATURE28.5 -0.003166   0.074194  -0.043  0.96596   
## SL_MALE:TEMPERATURE30   -0.088563   0.055022  -1.610  0.10749   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Anova
model1a |> Anova()
Confint
model1a |> confint()
##                                          2.5 %      97.5 %     Estimate
## (Intercept)                        -5.81119565  9.22203719  1.705420772
## SL_MALE                             0.03817567  0.19373130  0.115953484
## TEMPERATURE28.5                   -12.44146230 14.78945897  1.173998335
## TEMPERATURE30                      -0.87270857 19.41451247  9.270901952
## SL_MALE:TEMPERATURE28.5            -0.14858388  0.14225099 -0.003166446
## SL_MALE:TEMPERATURE30              -0.19640438  0.01927813 -0.088563126
## Std.Dev.(Intercept)|CLUTCH_NUMBER   0.89522033  1.34545606  1.097487865
r-squared
model1a |> r2_nakagawa()
## # R2 for Mixed Models
## 
##   Conditional R2: 0.537
##      Marginal R2: 0.120

Summary figure

m1.sl <- emmeans(model1a, ~ SL_MALE*TEMPERATURE, 
                 at =list(SL_MALE=seq(from =min(m1_df$SL_MALE), to =max(m1_df$SL_MALE), by=.25)))

m1.sl.df <- as.data.frame(m1.sl)

m1.sl.obs <- drop_na(m1_df, SL_MALE, STANDARD_LENGTH) |> 
  mutate(Pred =predict(model1a, re.form=NA, type ='response'), 
         Resid =residuals(model1a, type ='response'), 
         Fit =Pred+Resid) 

m1.sl.obs.summarize <-  m1.sl.obs |> 
  group_by(CLUTCH_NUMBER, TEMPERATURE) |> 
  summarise(mean.sl =mean(STANDARD_LENGTH, na.rm=TRUE), 
            mean.sl.male =mean(SL_MALE, na.rm = TRUE), 
            sd.sl =sd(STANDARD_LENGTH, na.rm =TRUE), 
            n.sl = n()) |> 
  mutate(se.sl = sd.sl / sqrt(n.sl), 
         lower.ci.sl =mean.sl - qt(1-(0.05/2), n.sl -1) * se.sl, 
         upper.ci.sl =mean.sl + qt(1-(0.05/2), n.sl -1) * se.sl) |> 
  ungroup()
## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m1.sl.df, aes(x=SL_MALE, y=emmean)) + 
  stat_smooth(aes(color=TEMPERATURE), 
              method = "lm") + 
  geom_pointrange(data = m1.sl.obs.summarize, aes(x =mean.sl.male, 
                                                  y =mean.sl, 
                                                  ymin =lower.ci.sl, 
                                                  ymax =upper.ci.sl, 
                                                  color = TEMPERATURE)) +  
  scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) + 
  scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
  facet_wrap(~TEMPERATURE)+
  xlab("PARENTAL MALE STANDARD LENGTH (mm)") + 
  ylab("OFFSPRING STANDARD LENGTH (mm)") + 
  ggtitle("Offspring-male relationship") +
  theme_classic()
## `geom_smooth()` using formula = 'y ~ x'

2-month

Models

Main hypothesis
model1a <- glmmTMB(STANDARD_LENGTH ~ SL_MALE*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(), 
                    data=m2_df)
Alternative hypothesis
model1b <- glmmTMB(STANDARD_LENGTH ~ SL_MALE*TEMPERATURE*as.numeric(PARENTAL_DAYS_IN_TREATMENT) + (1|CLUTCH_NUMBER), 
                    family='gaussian', 
                    data=m2_df)

Model selection

AIC(modelNULL, model1a, model1b, k=12) 
## Warning in AIC.default(modelNULL, model1a, model1b, k = 12): models are not all
## fitted to the same number of observations
BIC(modelNULL, model1a, model1b)
## Warning in BIC.default(modelNULL, model1a, model1b): models are not all fitted
## to the same number of observations

The null model appears better than the models that we used. Let’s explore the data bit more and see if we can find a reason for this. Let’s start by looking at a basic histogram of our data.

hist(m2_df$STANDARD_LENGTH)

There appears to be a left skew within our data. Let’s see if this can be better modelled with a Gamma distribution. If not we can try to incorporate transformations to our response variable. The model validations below also could use some improving.

Model validation

DHARMa
model1a |> 
  simulateResiduals(plot=TRUE)  

## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help. 
##  
## Scaled residual values: 0 0.124 0.028 0.068 0.4 0.384 0.396 0.492 0.476 0.328 0.504 0.776 0.664 0.612 0.184 0.548 0.644 0.408 0.384 0.588 ...
model1a |> testResiduals(plot=T) 

## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.069304, p-value = 1.066e-05
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 0.99972, p-value = 1
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 24, observations = 1264, p-value =
## 0.0001642
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.01220249 0.02812061
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.01898734
## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.069304, p-value = 1.066e-05
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 0.99972, p-value = 1
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 24, observations = 1264, p-value =
## 0.0001642
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.01220249 0.02812061
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.01898734
performance
model1a |> 
  check_model(detrend=FALSE)
## `check_outliers()` does not yet support models of class `glmmTMB`.

Models (Gamma)

Model selection

AIC(modelNULL, model1a, model1a.gammalog,model1a.gammaid , k=4) 
## Warning in AIC.default(modelNULL, model1a, model1a.gammalog, model1a.gammaid, :
## models are not all fitted to the same number of observations
BIC(modelNULL, model1a, model1a.gammalog,model1a.gammaid )
## Warning in BIC.default(modelNULL, model1a, model1a.gammalog, model1a.gammaid):
## models are not all fitted to the same number of observations

Conclusion: Using a gamma distribution does not help the model. Next we can try transforming the data. A sqrt transformation will be used on the response variable to help with the left skewness.

Models - transformations

Main hypothesis - sqrt transformation
model1a.sqrt <- glmmTMB(sqrt(STANDARD_LENGTH) ~ SL_MALE*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(link="identity"), 
                    data=m2_df)

model1a.loglink <- glmmTMB(sqrt(STANDARD_LENGTH) ~ SL_MALE*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(link="log"), 
                    data=m2_df) 
## Warning in (function (start, objective, gradient = NULL, hessian = NULL, :
## NA/NaN function evaluation
model1a.log <- glmmTMB(log(STANDARD_LENGTH) ~ SL_MALE*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(link="identity"), 
                    data=m2_df)

Model selection

AIC(modelNULL, model1a, model1a.sqrt,model1a.loglink,model1a.log,   k=4) 
## Warning in AIC.default(modelNULL, model1a, model1a.sqrt, model1a.loglink, :
## models are not all fitted to the same number of observations
BIC(modelNULL, model1a, model1a.sqrt,model1a.loglink,model1a.log)
## Warning in BIC.default(modelNULL, model1a, model1a.sqrt, model1a.loglink, :
## models are not all fitted to the same number of observations

Conclusion: The log transformation greatly improves model performance. The sqrt-transformed model will be used moving forward. Lets move on to model validations

Model validation

DHARMa
model1a.log |> 
  simulateResiduals(plot=TRUE)  

## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help. 
##  
## Scaled residual values: 0 0.132 0.028 0.068 0.42 0.432 0.416 0.508 0.496 0.34 0.528 0.756 0.668 0.62 0.196 0.56 0.648 0.42 0.396 0.616 ...
model1a.log |> testResiduals(plot=T) 

## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.098532, p-value = 4.387e-11
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 0.99991, p-value = 0.984
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 30, observations = 1264, p-value =
## 2.488e-07
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.01606931 0.03370971
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.02373418
## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.098532, p-value = 4.387e-11
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 0.99991, p-value = 0.984
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 30, observations = 1264, p-value =
## 2.488e-07
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.01606931 0.03370971
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.02373418
performance
model1a.log |> 
  check_model(detrend=FALSE)
## `check_outliers()` does not yet support models of class `glmmTMB`.

Partial effect plots

model1a.sqrt |> ggemmeans(~SL_MALE|TEMPERATURE) |> 
  plot(add.data =FALSE) 
## Model has sqrt-transformed response. Back-transforming predictions to
##   original response scale. Standard errors are still on the sqrt-scale.

model1a.sqrt |> ggemmeans(~TEMPERATURE) |> 
  plot(add.data =FALSE) 
## NOTE: Results may be misleading due to involvement in interactions
## Model has sqrt-transformed response. Back-transforming predictions to
##   original response scale. Standard errors are still on the sqrt-scale.

Model investigation

Summary
model1a.sqrt |> summary()
##  Family: gaussian  ( identity )
## Formula:          
## sqrt(STANDARD_LENGTH) ~ SL_MALE * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m2_df
## 
##      AIC      BIC   logLik deviance df.resid 
##    336.3    377.5   -160.2    320.3     1256 
## 
## Random effects:
## 
## Conditional model:
##  Groups        Name        Variance Std.Dev.
##  CLUTCH_NUMBER (Intercept) 0.008566 0.09255 
##  Residual                  0.071513 0.26742 
## Number of obs: 1264, groups:  CLUTCH_NUMBER, 47
## 
## Dispersion estimate for gaussian family (sigma^2): 0.0715 
## 
## Conditional model:
##                           Estimate Std. Error z value Pr(>|z|)    
## (Intercept)              4.6342522  0.3940090  11.762   <2e-16 ***
## SL_MALE                 -0.0013400  0.0040633  -0.330    0.742    
## TEMPERATURE28.5         -0.1533775  0.6713293  -0.228    0.819    
## TEMPERATURE30           -0.0046237  0.5333702  -0.009    0.993    
## SL_MALE:TEMPERATURE28.5  0.0014952  0.0071495   0.209    0.834    
## SL_MALE:TEMPERATURE30   -0.0003783  0.0056418  -0.067    0.947    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Anova
model1a.sqrt |> Anova()
Confint
model1a.sqrt |> confint()
##                                          2.5 %      97.5 %      Estimate
## (Intercept)                        3.862008774 5.406495540  4.6342521569
## SL_MALE                           -0.009303963 0.006623978 -0.0013399925
## TEMPERATURE28.5                   -1.469158754 1.162403822 -0.1533774658
## TEMPERATURE30                     -1.050010054 1.040762723 -0.0046236656
## SL_MALE:TEMPERATURE28.5           -0.012517595 0.015507955  0.0014951799
## SL_MALE:TEMPERATURE30             -0.011436096 0.010679509 -0.0003782931
## Std.Dev.(Intercept)|CLUTCH_NUMBER  0.070908531 0.120808073  0.0925544328
r-squared
model1a.sqrt |> r2_nakagawa()
## # R2 for Mixed Models
## 
##   Conditional R2: 0.110
##      Marginal R2: 0.003

Summary figure

m2.sl <- emmeans(model1a.sqrt, ~ SL_MALE*TEMPERATURE, 
                 at =list(SL_MALE=seq(from =min(m2_df$SL_MALE), to =max(m2_df$SL_MALE), by=.25)))

m2.sl.df <- as.data.frame(m2.sl)

m2.sl.obs <- drop_na(m2_df, SL_MALE, STANDARD_LENGTH) |> 
  mutate(Pred =predict(model1a.sqrt, re.form=NA, type ='response'), 
         Resid =residuals(model1a.sqrt, type ='response'), 
         Fit =Pred+Resid) 

m2.sl.obs.summarize <-  m2.sl.obs |> 
  group_by(CLUTCH_NUMBER, TEMPERATURE) |> 
  summarise(mean.sl =mean(STANDARD_LENGTH, na.rm=TRUE), 
            mean.sl.male =mean(SL_MALE, na.rm = TRUE), 
            sd.sl =sd(STANDARD_LENGTH, na.rm =TRUE), 
            n.sl = n()) |> 
  mutate(se.sl = sd.sl / sqrt(n.sl), 
         lower.ci.sl =mean.sl - qt(1-(0.05/2), n.sl -1) * se.sl, 
         upper.ci.sl =mean.sl + qt(1-(0.05/2), n.sl -1) * se.sl) |> 
  ungroup()
## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m2.sl.df, aes(x=SL_MALE, y=emmean)) + 
  stat_smooth(aes(color=TEMPERATURE), 
              method = "lm", 
              formula = y^2 ~ x) + 
  geom_pointrange(data = m2.sl.obs.summarize, aes(x =mean.sl.male, 
                                                  y =mean.sl, 
                                                  ymin =lower.ci.sl, 
                                                  ymax =upper.ci.sl, 
                                                  color = TEMPERATURE)) +  
  scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) + 
  scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
  facet_wrap(~TEMPERATURE)+
  xlab("PARENTAL MALE STANDARD LENGTH (mm)") + 
  ylab("OFFSPRING STANDARD LENGTH (mm)") + 
  ggtitle("Offspring-male relationship") +
  theme_classic()

2.5-month

Models

Main hypothesis
model1a <- glmmTMB(STANDARD_LENGTH ~ SL_MALE*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(link ="identity"), 
                    data=m2.5_df)
Alternative hypothesis
model1b <- glmmTMB(STANDARD_LENGTH ~ SL_MALE*TEMPERATURE*as.numeric(PARENTAL_DAYS_IN_TREATMENT) + (1|CLUTCH_NUMBER), 
                    family=gaussian(link ="identity"), 
                    data=m2.5_df)

Model selection

AIC(modelNULL, model1a, model1b, k=12) 
BIC(modelNULL, model1a, model1b)

Once again the NULL model seems to outperform our hypothesis testing models. Let’s follow the steps that we conducted for 2-month data and appy a log transformation to our dataset to see if it improved the model.

Models

Main hypothesis
model1a.log <- glmmTMB(log(STANDARD_LENGTH) ~ SL_MALE*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(link="identity"), 
                    data=m2.5_df)

model1a.sqrt <- glmmTMB(sqrt(STANDARD_LENGTH) ~ SL_MALE*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(link ="identity"), 
                    data=m2.5_df)

Model selection

AIC(modelNULL, model1a, model1a.log,model1a.sqrt, k=5) 
BIC(modelNULL, model1a, model1a.log,model1a.sqrt)

Model validation

DHARMa
model1a.log |> 
  simulateResiduals(plot=TRUE)  

## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help. 
##  
## Scaled residual values: 0.944 0.876 0.86 0.564 0.652 0.788 0.86 0.42 0.896 0.404 0.94 0.272 0.788 0.824 0.928 0.712 0.892 0.972 0.848 0.336 ...
model1a.log |> testResiduals(plot=T) 

## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.078695, p-value = 1.194e-07
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 0.98858, p-value = 0.792
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 32, observations = 1343, p-value =
## 9.318e-08
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.01635359 0.03347159
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.02382725
## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.078695, p-value = 1.194e-07
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 0.98858, p-value = 0.792
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 32, observations = 1343, p-value =
## 9.318e-08
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.01635359 0.03347159
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.02382725
performance
model1a.log |> 
  check_model(detrend=FALSE)
## `check_outliers()` does not yet support models of class `glmmTMB`.

Partial effect plots

model1a.log |> ggemmeans(~SL_MALE|TEMPERATURE) |> 
  plot(add.data =FALSE) 
## Model has log-transformed response. Back-transforming predictions to
##   original response scale. Standard errors are still on the log-scale.

model1a.log |> ggemmeans(~TEMPERATURE) |> 
  plot(add.data =FALSE) 
## NOTE: Results may be misleading due to involvement in interactions
## Model has log-transformed response. Back-transforming predictions to
##   original response scale. Standard errors are still on the log-scale.

Model investigation

Summary
model1a.log |> summary()
##  Family: gaussian  ( identity )
## Formula:          
## log(STANDARD_LENGTH) ~ SL_MALE * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m2.5_df
## 
##      AIC      BIC   logLik deviance df.resid 
##  -1775.2  -1733.6    895.6  -1791.2     1335 
## 
## Random effects:
## 
## Conditional model:
##  Groups        Name        Variance Std.Dev.
##  CLUTCH_NUMBER (Intercept) 0.002184 0.04673 
##  Residual                  0.014515 0.12048 
## Number of obs: 1343, groups:  CLUTCH_NUMBER, 52
## 
## Dispersion estimate for gaussian family (sigma^2): 0.0145 
## 
## Conditional model:
##                          Estimate Std. Error z value Pr(>|z|)    
## (Intercept)              3.001805   0.179768  16.698   <2e-16 ***
## SL_MALE                  0.001841   0.001861   0.989    0.322    
## TEMPERATURE28.5          0.091645   0.322762   0.284    0.776    
## TEMPERATURE30            0.187153   0.238310   0.785    0.432    
## SL_MALE:TEMPERATURE28.5 -0.001078   0.003446  -0.313    0.754    
## SL_MALE:TEMPERATURE30   -0.002231   0.002536  -0.880    0.379    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Anova
model1a.log |> Anova()
Confint
model1a.log |> confint()
##                                          2.5 %      97.5 %     Estimate
## (Intercept)                        2.649466952 3.354143381  3.001805167
## SL_MALE                           -0.001806051 0.005487995  0.001840972
## TEMPERATURE28.5                   -0.540956737 0.724247427  0.091645345
## TEMPERATURE30                     -0.279926559 0.654232769  0.187153105
## SL_MALE:TEMPERATURE28.5           -0.007832090 0.005676656 -0.001077717
## SL_MALE:TEMPERATURE30             -0.007201632 0.002739709 -0.002230961
## Std.Dev.(Intercept)|CLUTCH_NUMBER  0.036225309 0.060280991  0.046730050
r-squared
model1a.log |> r2_nakagawa()
## # R2 for Mixed Models
## 
##   Conditional R2: 0.141
##      Marginal R2: 0.011

Summary figure

m1.sl <- emmeans(model1a.log, ~ SL_MALE*TEMPERATURE, 
                 at =list(SL_MALE=seq(from =min(m2.5_df$SL_MALE), to =max(m2.5_df$SL_MALE), by=.25)))

m1.sl.df <- as.data.frame(m1.sl)

m1.sl.obs <- drop_na(m2.5_df, SL_MALE, STANDARD_LENGTH) |> 
  mutate(Pred =predict(model1a.log, re.form=NA, type ='response'), 
         Resid =residuals(model1a.log, type ='response'), 
         Fit =Pred+Resid) 

m1.sl.obs.summarize <-  m1.sl.obs |> 
  group_by(CLUTCH_NUMBER, TEMPERATURE) |> 
  summarise(mean.sl =mean(Fit, na.rm=TRUE), 
            mean.sl.male =mean(SL_MALE, na.rm = TRUE), 
            sd.sl =sd(Fit, na.rm =TRUE), 
            n.sl = n()) |> 
  mutate(se.sl = sd.sl / sqrt(n.sl), 
         lower.ci.sl =mean.sl - qt(1-(0.05/2), n.sl -1) * se.sl, 
         upper.ci.sl =mean.sl + qt(1-(0.05/2), n.sl -1) * se.sl) |> 
  ungroup()
## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m1.sl.df, aes(x=SL_MALE, y=emmean)) + 
  stat_smooth(aes(color=TEMPERATURE), 
              method = "lm", 
              formula = y ~ x) + 
  geom_pointrange(data = m1.sl.obs.summarize, aes(x =mean.sl.male, 
                                                  y =mean.sl, 
                                                  ymin =lower.ci.sl, 
                                                  ymax =upper.ci.sl, 
                                                  color = TEMPERATURE)) +  
  scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) + 
  scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
  facet_wrap(~TEMPERATURE)+
  xlab("PARENTAL MALE STANDARD LENGTH (mm)") + 
  ylab("OFFSPRING STANDARD LENGTH (mm)") + 
  ggtitle("Offspring-male relationship") +
  theme_classic()

Offspring-female

1-month

Models

Main hypothesis
model1a <- glmmTMB(STANDARD_LENGTH ~ SL_FEMALE*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family='gaussian', 
                    data=m1_df)
Alternative hypothesis
model1b <- glmmTMB(STANDARD_LENGTH ~ SL_FEMALE*TEMPERATURE*as.numeric(PARENTAL_DAYS_IN_TREATMENT) + (1|CLUTCH_NUMBER), 
                    family='gaussian', 
                    data=m1_df)

Model selection

AIC(modelNULL, model1a, model1b, k=12) 
## Warning in AIC.default(modelNULL, model1a, model1b, k = 12): models are not all
## fitted to the same number of observations
BIC(modelNULL, model1a, model1b)
## Warning in BIC.default(modelNULL, model1a, model1b): models are not all fitted
## to the same number of observations

Model1a was selected as the best model and will be used going forward.

Model validation

DHARMa
model1a |> 
  simulateResiduals(plot=TRUE)  
## qu = 0.25, log(sigma) = -2.465176 : outer Newton did not converge fully.
## qu = 0.25, log(sigma) = -2.373479 : outer Newton did not converge fully.

## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help. 
##  
## Scaled residual values: 0.884 0.944 0.996 0.688 0.8 0.732 0.964 0.74 0.364 0.952 0.304 0.724 0.78 0.86 0.6 0.348 0.912 0.768 0.544 0.936 ...
model1a |> testResiduals(plot=T) 

## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.018029, p-value = 0.7799
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.0184, p-value = 0.808
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 11, observations = 1331, p-value = 0.8766
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.004132601 0.014739191
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                            0.008264463
## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.018029, p-value = 0.7799
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.0184, p-value = 0.808
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 11, observations = 1331, p-value = 0.8766
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.004132601 0.014739191
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                            0.008264463
performance
model1a |> 
  check_model(detrend=FALSE)
## `check_outliers()` does not yet support models of class `glmmTMB`.

Partial effect plots

model1a |> ggemmeans(~SL_FEMALE|TEMPERATURE) |> 
  plot(add.data =FALSE) 

model1a |> ggemmeans(~TEMPERATURE) |> 
  plot(add.data =FALSE) 
## NOTE: Results may be misleading due to involvement in interactions

Model investigation

Summary
model1a |> summary()
##  Family: gaussian  ( identity )
## Formula:          
## STANDARD_LENGTH ~ SL_FEMALE * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m1_df
## 
##      AIC      BIC   logLik deviance df.resid 
##   4340.7   4382.2  -2162.3   4324.7     1323 
## 
## Random effects:
## 
## Conditional model:
##  Groups        Name        Variance Std.Dev.
##  CLUTCH_NUMBER (Intercept) 1.099    1.048   
##  Residual                  1.346    1.160   
## Number of obs: 1331, groups:  CLUTCH_NUMBER, 48
## 
## Dispersion estimate for gaussian family (sigma^2): 1.35 
## 
## Conditional model:
##                           Estimate Std. Error z value Pr(>|z|)  
## (Intercept)                5.53924    2.90633   1.906   0.0567 .
## SL_FEMALE                  0.07590    0.03017   2.516   0.0119 *
## TEMPERATURE28.5            2.68132    4.55262   0.589   0.5559  
## TEMPERATURE30              5.32954    4.88350   1.091   0.2751  
## SL_FEMALE:TEMPERATURE28.5 -0.02056    0.04863  -0.423   0.6725  
## SL_FEMALE:TEMPERATURE30   -0.04828    0.05141  -0.939   0.3476  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Anova
model1a |> Anova()
Confint
model1a |> confint()
##                                         2.5 %     97.5 %    Estimate
## (Intercept)                       -0.15707023 11.2355419  5.53923585
## SL_FEMALE                          0.01677235  0.1350219  0.07589712
## TEMPERATURE28.5                   -6.24165530 11.6042999  2.68132230
## TEMPERATURE30                     -4.24194847 14.9010207  5.32953612
## SL_FEMALE:TEMPERATURE28.5         -0.11587486  0.0747581 -0.02055838
## SL_FEMALE:TEMPERATURE30           -0.14904158  0.0524739 -0.04828384
## Std.Dev.(Intercept)|CLUTCH_NUMBER  0.85086513  1.2919121  1.04844787
r-squared
model1a |> r2_nakagawa()
## # R2 for Mixed Models
## 
##   Conditional R2: 0.505
##      Marginal R2: 0.101

Summary figure

m1_df <-  
  m1_df |> 
  drop_na(SL_FEMALE)
m2.sl <- emmeans(model1a.sqrt, ~ SL_MALE*TEMPERATURE, 
                 at =list(SL_MALE=seq(from =min(m2_df$SL_MALE), to =max(m2_df$SL_MALE), by=.25)))

m1.sl <- emmeans(model1a, ~ SL_FEMALE*TEMPERATURE, 
                 at =list(SL_FEMALE=seq(from =min(m1_df$SL_FEMALE), to =max(m1_df$SL_FEMALE), by=.25)))

m1.sl.df <- as.data.frame(m1.sl)

m1.sl.obs <- drop_na(m1_df, SL_FEMALE, STANDARD_LENGTH) |> 
  mutate(Pred =predict(model1a, re.form=NA, type ='response'), 
         Resid =residuals(model1a, type ='response'), 
         Fit =Pred+Resid) 

m1.sl.obs.summarize <-  m1.sl.obs |> 
  group_by(CLUTCH_NUMBER, TEMPERATURE) |> 
  summarise(mean.sl =mean(Fit, na.rm=TRUE), 
            mean.sl.female =mean(SL_FEMALE, na.rm = TRUE), 
            sd.sl =sd(Fit, na.rm =TRUE), 
            n.sl = n()) |> 
  mutate(se.sl = sd.sl / sqrt(n.sl), 
         lower.ci.sl =mean.sl - qt(1-(0.05/2), n.sl -1) * se.sl, 
         upper.ci.sl =mean.sl + qt(1-(0.05/2), n.sl -1) * se.sl) |> 
  ungroup()
## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m1.sl.df, aes(x=SL_FEMALE, y=emmean)) + 
  stat_smooth(aes(color=TEMPERATURE), 
              method = "lm") + 
  geom_pointrange(data = m1.sl.obs.summarize, aes(x =mean.sl.female, 
                                                  y =mean.sl, 
                                                  ymin =lower.ci.sl, 
                                                  ymax =upper.ci.sl, 
                                                  color = TEMPERATURE)) +  
  scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) + 
  scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
  facet_wrap(~TEMPERATURE)+
  xlab("PARENTAL FEMALE STANDARD LENGTH (mm)") + 
  ylab("OFFSPRING STANDARD LENGTH (mm)") + 
  ggtitle("Offspring-male relationship") +
  theme_classic()
## `geom_smooth()` using formula = 'y ~ x'

2-month

Models

Main hypothesis
model1a <- glmmTMB(STANDARD_LENGTH ~ SL_FEMALE*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(), 
                    data=m2_df)
Alternative hypothesis
model1b <- glmmTMB(STANDARD_LENGTH ~ SL_FEMALE*TEMPERATURE*as.numeric(PARENTAL_DAYS_IN_TREATMENT) + (1|CLUTCH_NUMBER), 
                    family='gaussian', 
                    data=m2_df)

Model selection

AIC(modelNULL, model1a, model1b, k=12) 
## Warning in AIC.default(modelNULL, model1a, model1b, k = 12): models are not all
## fitted to the same number of observations
BIC(modelNULL, model1a, model1b)
## Warning in BIC.default(modelNULL, model1a, model1b): models are not all fitted
## to the same number of observations

Model validation

DHARMa
model1a |> 
  simulateResiduals(plot=TRUE)  

## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help. 
##  
## Scaled residual values: 0.004 0.092 0.024 0.096 0.404 0.328 0.392 0.512 0.412 0.32 0.52 0.78 0.6 0.572 0.172 0.464 0.604 0.352 0.324 0.512 ...
model1a |> testResiduals(plot=T) 

## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.076927, p-value = 1.165e-06
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.0001, p-value = 0.984
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 17, observations = 1213, p-value = 0.03297
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.008184788 0.022344553
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.01401484
## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.076927, p-value = 1.165e-06
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.0001, p-value = 0.984
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 17, observations = 1213, p-value = 0.03297
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.008184788 0.022344553
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.01401484
performance
model1a |> 
  check_model(detrend=FALSE)
## `check_outliers()` does not yet support models of class `glmmTMB`.

hist(m2_df$STANDARD_LENGTH^3) 

#ggplot(m2_df, aes(x=STANDARD_LENGTH, color=TEMPERATURE, fill=TEMPERATURE)) + 
  #geom_histogram(alpha=0.5) + 
 # theme_classic()

Models - transformations

Main hypothesis - sqrt transformation
model1a.sqrt <- glmmTMB(sqrt(STANDARD_LENGTH) ~ SL_FEMALE*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(link="identity"), 
                    data=m2_df)

model1a.pwr3 <- glmmTMB(STANDARD_LENGTH^3 ~ SL_FEMALE*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(link="identity"), 
                    data=m2_df) 

model1a.log <- glmmTMB(log(STANDARD_LENGTH) ~ SL_FEMALE*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(link="identity"), 
                    data=m2_df)

Model selection

AIC(modelNULL, model1a, model1a.sqrt,model1a.pwr3,model1a.log,   k=4) 
## Warning in AIC.default(modelNULL, model1a, model1a.sqrt, model1a.pwr3,
## model1a.log, : models are not all fitted to the same number of observations
BIC(modelNULL, model1a, model1a.sqrt,model1a.pwr3,model1a.log)
## Warning in BIC.default(modelNULL, model1a, model1a.sqrt, model1a.pwr3,
## model1a.log): models are not all fitted to the same number of observations

Conclusion: The log transformation greatly improves model performance. The sqrt-transformed model will be used moving forward. Lets move on to model validations

Model validation

DHARMa
model1a.log |> 
  simulateResiduals(plot=TRUE)  

## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help. 
##  
## Scaled residual values: 0 0.116 0.024 0.104 0.448 0.364 0.416 0.536 0.444 0.34 0.548 0.776 0.62 0.608 0.188 0.48 0.612 0.388 0.368 0.56 ...
model1a.log |> testResiduals(plot=T) 

## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.10529, p-value = 4.171e-12
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.0004, p-value = 0.976
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 25, observations = 1213, p-value =
## 2.499e-05
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.01338107 0.03027497
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.02061006
## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.10529, p-value = 4.171e-12
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.0004, p-value = 0.976
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 25, observations = 1213, p-value =
## 2.499e-05
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.01338107 0.03027497
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.02061006
performance
model1a.log |> 
  check_model(detrend=FALSE)
## `check_outliers()` does not yet support models of class `glmmTMB`.

Partial effect plots

model1a.sqrt |> ggemmeans(~SL_FEMALE|TEMPERATURE) |> 
  plot(add.data =FALSE) 
## Model has sqrt-transformed response. Back-transforming predictions to
##   original response scale. Standard errors are still on the sqrt-scale.

model1a.sqrt |> ggemmeans(~TEMPERATURE) |> 
  plot(add.data =FALSE) 
## NOTE: Results may be misleading due to involvement in interactions
## Model has sqrt-transformed response. Back-transforming predictions to
##   original response scale. Standard errors are still on the sqrt-scale.

Model investigation

Summary
model1a.sqrt |> summary()
##  Family: gaussian  ( identity )
## Formula:          
## sqrt(STANDARD_LENGTH) ~ SL_FEMALE * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m2_df
## 
##      AIC      BIC   logLik deviance df.resid 
##    305.3    346.1   -144.6    289.3     1205 
## 
## Random effects:
## 
## Conditional model:
##  Groups        Name        Variance Std.Dev.
##  CLUTCH_NUMBER (Intercept) 0.007992 0.0894  
##  Residual                  0.070568 0.2656  
## Number of obs: 1213, groups:  CLUTCH_NUMBER, 45
## 
## Dispersion estimate for gaussian family (sigma^2): 0.0706 
## 
## Conditional model:
##                             Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                4.5186835  0.2995276  15.086   <2e-16 ***
## SL_FEMALE                 -0.0002317  0.0030872  -0.075    0.940    
## TEMPERATURE28.5           -0.2971534  0.4491625  -0.662    0.508    
## TEMPERATURE30              0.5367662  0.5186774   1.035    0.301    
## SL_FEMALE:TEMPERATURE28.5  0.0032236  0.0047758   0.675    0.500    
## SL_FEMALE:TEMPERATURE30   -0.0059220  0.0054242  -1.092    0.275    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Anova
model1a.sqrt |> Anova()
Confint
model1a.sqrt |> confint()
##                                          2.5 %      97.5 %      Estimate
## (Intercept)                        3.931620211 5.105746782  4.5186834966
## SL_FEMALE                         -0.006282586 0.005819150 -0.0002317182
## TEMPERATURE28.5                   -1.177495697 0.583188921 -0.2971533882
## TEMPERATURE30                     -0.479822825 1.553355148  0.5367661612
## SL_FEMALE:TEMPERATURE28.5         -0.006136814 0.012583936  0.0032235609
## SL_FEMALE:TEMPERATURE30           -0.016553152 0.004709128 -0.0059220122
## Std.Dev.(Intercept)|CLUTCH_NUMBER  0.067828174 0.117834224  0.0894006727
r-squared
model1a.sqrt |> r2_nakagawa()
## # R2 for Mixed Models
## 
##   Conditional R2: 0.110
##      Marginal R2: 0.009

Summary figure

m2_df <-  
  m2_df |> 
  drop_na(SL_FEMALE)
m2.sl <- emmeans(model1a.sqrt, ~ SL_FEMALE*TEMPERATURE, 
                 at =list(SL_FEMALE=seq(from =min(m2_df$SL_FEMALE), to =max(m2_df$SL_FEMALE), by=.25)))

m2.sl.df <- as.data.frame(m2.sl)

m2.sl.obs <- drop_na(m2_df, SL_FEMALE, STANDARD_LENGTH) |> 
  mutate(Pred =predict(model1a.sqrt, re.form=NA, type ='response'), 
         Resid =residuals(model1a.sqrt, type ='response'), 
         Fit =Pred+Resid) 

m2.sl.obs.summarize <-  m2.sl.obs |> 
  group_by(CLUTCH_NUMBER, TEMPERATURE) |> 
  summarise(mean.sl =mean(Fit, na.rm=TRUE), 
            mean.sl.male =mean(SL_FEMALE, na.rm = TRUE), 
            sd.sl =sd(Fit, na.rm =TRUE), 
            n.sl = n()) |> 
  mutate(se.sl = sd.sl / sqrt(n.sl), 
         lower.ci.sl =mean.sl - qt(1-(0.05/2), n.sl -1) * se.sl, 
         upper.ci.sl =mean.sl + qt(1-(0.05/2), n.sl -1) * se.sl) |> 
  ungroup()
## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m2.sl.df, aes(x=SL_FEMALE, y=emmean)) + 
  stat_smooth(aes(color=TEMPERATURE), 
              method = "lm", 
              formula = y ~ x) + 
  geom_pointrange(data = m2.sl.obs.summarize, aes(x =mean.sl.male, 
                                                  y =mean.sl, 
                                                  ymin =lower.ci.sl, 
                                                  ymax =upper.ci.sl, 
                                                  color = TEMPERATURE)) +  
  scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) + 
  scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
  facet_wrap(~TEMPERATURE)+
  xlab("PARENTAL FEMALE STANDARD LENGTH (mm)") + 
  ylab("OFFSPRING STANDARD LENGTH (mm)") + 
  ggtitle("Offspring-male relationship") +
  theme_classic()

2.5-month

Models

Main hypothesis
model1a <- glmmTMB(STANDARD_LENGTH ~ SL_FEMALE*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(link ="identity"), 
                    data=m2.5_df)
Alternative hypothesis
model1b <- glmmTMB(STANDARD_LENGTH ~ SL_FEMALE*TEMPERATURE*as.numeric(PARENTAL_DAYS_IN_TREATMENT) + (1|CLUTCH_NUMBER), 
                    family=gaussian(link ="identity"), 
                    data=m2.5_df)

Model selection

AIC(modelNULL, model1a, model1b, k=12) 
## Warning in AIC.default(modelNULL, model1a, model1b, k = 12): models are not all
## fitted to the same number of observations
BIC(modelNULL, model1a, model1b)
## Warning in BIC.default(modelNULL, model1a, model1b): models are not all fitted
## to the same number of observations

Once again the NULL model seems to outperform our hypothesis testing models. Let’s follow the steps that we conducted for 2-month data and appy a log transformation to our dataset to see if it improved the model.

Models

Main hypothesis
model1a.log <- glmmTMB(log(STANDARD_LENGTH) ~ SL_FEMALE*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(link="identity"), 
                    data=m2.5_df)

model1a.sqrt <- glmmTMB(sqrt(STANDARD_LENGTH) ~ SL_FEMALE*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(link ="identity"), 
                    data=m2.5_df)

Model selection

AIC(modelNULL, model1a, model1a.log,model1a.sqrt, k=5) 
## Warning in AIC.default(modelNULL, model1a, model1a.log, model1a.sqrt, k = 5):
## models are not all fitted to the same number of observations
BIC(modelNULL, model1a, model1a.log,model1a.sqrt)
## Warning in BIC.default(modelNULL, model1a, model1a.log, model1a.sqrt): models
## are not all fitted to the same number of observations

Model validation

DHARMa
model1a.log |> 
  simulateResiduals(plot=TRUE)  

## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help. 
##  
## Scaled residual values: 0.904 0.888 0.812 0.5 0.664 0.764 0.84 0.42 0.84 0.456 0.94 0.352 0.7 0.836 0.928 0.748 0.884 0.98 0.804 0.328 ...
model1a.log |> testResiduals(plot=T) 

## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.07065, p-value = 5.159e-06
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 0.98886, p-value = 0.864
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 25, observations = 1289, p-value =
## 9.858e-05
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.01258969 0.02849825
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.01939488
## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.07065, p-value = 5.159e-06
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 0.98886, p-value = 0.864
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 25, observations = 1289, p-value =
## 9.858e-05
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.01258969 0.02849825
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.01939488
performance
model1a.log |> 
  check_model(detrend=FALSE)
## `check_outliers()` does not yet support models of class `glmmTMB`.

Partial effect plots

model1a.log |> ggemmeans(~SL_FEMALE|TEMPERATURE) |> 
  plot(add.data =FALSE) 
## Model has log-transformed response. Back-transforming predictions to
##   original response scale. Standard errors are still on the log-scale.

model1a.log |> ggemmeans(~TEMPERATURE) |> 
  plot(add.data =FALSE) 
## NOTE: Results may be misleading due to involvement in interactions
## Model has log-transformed response. Back-transforming predictions to
##   original response scale. Standard errors are still on the log-scale.

Model investigation

Summary
model1a.log |> summary()
##  Family: gaussian  ( identity )
## Formula:          
## log(STANDARD_LENGTH) ~ SL_FEMALE * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m2.5_df
## 
##      AIC      BIC   logLik deviance df.resid 
##  -1722.1  -1680.8    869.0  -1738.1     1281 
## 
## Random effects:
## 
## Conditional model:
##  Groups        Name        Variance Std.Dev.
##  CLUTCH_NUMBER (Intercept) 0.002247 0.04741 
##  Residual                  0.014285 0.11952 
## Number of obs: 1289, groups:  CLUTCH_NUMBER, 50
## 
## Dispersion estimate for gaussian family (sigma^2): 0.0143 
## 
## Conditional model:
##                             Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                3.0169019  0.1436697  20.999   <2e-16 ***
## SL_FEMALE                  0.0017149  0.0014920   1.149    0.250    
## TEMPERATURE28.5            0.0437181  0.2237656   0.195    0.845    
## TEMPERATURE30              0.2064255  0.2355460   0.876    0.381    
## SL_FEMALE:TEMPERATURE28.5 -0.0005848  0.0023902  -0.245    0.807    
## SL_FEMALE:TEMPERATURE30   -0.0024581  0.0024848  -0.989    0.323    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Anova
model1a.log |> Anova()
Confint
model1a.log |> confint()
##                                          2.5 %      97.5 %     Estimate
## (Intercept)                        2.735314415 3.298489462  3.016901939
## SL_FEMALE                         -0.001209370 0.004639164  0.001714897
## TEMPERATURE28.5                   -0.394854344 0.482290509  0.043718083
## TEMPERATURE30                     -0.255236128 0.668087148  0.206425510
## SL_FEMALE:TEMPERATURE28.5         -0.005269533 0.004099951 -0.000584791
## SL_FEMALE:TEMPERATURE30           -0.007328298 0.002411998 -0.002458150
## Std.Dev.(Intercept)|CLUTCH_NUMBER  0.036649207 0.061320595  0.047406236
r-squared
model1a.log |> r2_nakagawa()
## # R2 for Mixed Models
## 
##   Conditional R2: 0.149
##      Marginal R2: 0.015

Summary figure

m2.5_df <- m2.5_df |> 
  drop_na(SL_FEMALE)
m2.5.sl <- emmeans(model1a.log, ~ SL_FEMALE*TEMPERATURE, 
                 at =list(SL_FEMALE=seq(from =min(m2.5_df$SL_FEMALE), to =max(m2.5_df$SL_FEMALE), by=.25)))

m2.5.sl.df <- as.data.frame(m2.5.sl)

m2.5.sl.obs <- drop_na(m2.5_df, SL_FEMALE, STANDARD_LENGTH) |> 
  mutate(Pred =predict(model1a.log, re.form=NA, type ='response'), 
         Resid =residuals(model1a.log, type ='response'), 
         Fit =Pred+Resid) 

m2.5.sl.obs.summarize <-  m2.5.sl.obs |> 
  group_by(CLUTCH_NUMBER, TEMPERATURE) |> 
  summarise(mean.sl =mean(Fit, na.rm=TRUE), 
            mean.sl.male =mean(SL_FEMALE, na.rm = TRUE), 
            sd.sl =sd(Fit, na.rm =TRUE), 
            n.sl = n()) |> 
  mutate(se.sl = sd.sl / sqrt(n.sl), 
         lower.ci.sl =mean.sl - qt(1-(0.05/2), n.sl -1) * se.sl, 
         upper.ci.sl =mean.sl + qt(1-(0.05/2), n.sl -1) * se.sl) |> 
  ungroup()
## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m2.5.sl.df, aes(x=SL_FEMALE, y=emmean)) + 
  stat_smooth(aes(color=TEMPERATURE), 
              method = "lm", 
              formula = y ~ x) + 
  geom_pointrange(data = m2.5.sl.obs.summarize, aes(x =mean.sl.male, 
                                                  y =mean.sl, 
                                                  ymin =lower.ci.sl, 
                                                  ymax =upper.ci.sl, 
                                                  color = TEMPERATURE)) +  
  scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) + 
  scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
  facet_wrap(~TEMPERATURE)+
  xlab("PARENTAL MALE STANDARD LENGTH (mm)") + 
  ylab("OFFSPRING STANDARD LENGTH (mm)") + 
  ggtitle("Offspring-male relationship") +
  theme_classic()

Offspring-Midpoint

1-month

Models

Main hypothesis
model1a <- glmmTMB(STANDARD_LENGTH ~ SL_MIDPOINT*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(), 
                    data=m1_df)
Alternative hypothesis
model1b <- glmmTMB(STANDARD_LENGTH ~ SL_MIDPOINT*TEMPERATURE*as.numeric(PARENTAL_DAYS_IN_TREATMENT) + (1|CLUTCH_NUMBER), 
                    family=gaussian(), 
                    data=m1_df)

Model selection

AIC(modelNULL, model1a, model1b, k=12) 
## Warning in AIC.default(modelNULL, model1a, model1b, k = 12): models are not all
## fitted to the same number of observations
BIC(modelNULL, model1a, model1b)
## Warning in BIC.default(modelNULL, model1a, model1b): models are not all fitted
## to the same number of observations

Model1a was selected as the best model and will be used going forward.

Model validation

DHARMa
model1a |> 
  simulateResiduals(plot=TRUE)  

## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help. 
##  
## Scaled residual values: 0.884 0.94 0.996 0.684 0.8 0.724 0.964 0.732 0.344 0.952 0.28 0.724 0.768 0.856 0.588 0.34 0.912 0.768 0.532 0.936 ...
model1a |> testResiduals(plot=T) 

## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.022999, p-value = 0.4821
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.0184, p-value = 0.808
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 10, observations = 1331, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.003608554 0.013773413
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                            0.007513148
## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.022999, p-value = 0.4821
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.0184, p-value = 0.808
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 10, observations = 1331, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.003608554 0.013773413
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                            0.007513148
performance
model1a |> 
  check_model(detrend=FALSE)
## `check_outliers()` does not yet support models of class `glmmTMB`.

Partial effect plots

model1a |> ggemmeans(~SL_MIDPOINT|TEMPERATURE) |> 
  plot(add.data =FALSE) 

model1a |> ggemmeans(~TEMPERATURE) |> 
  plot(add.data =FALSE) 
## NOTE: Results may be misleading due to involvement in interactions

Model investigation

Summary
model1a |> summary()
##  Family: gaussian  ( identity )
## Formula:          
## STANDARD_LENGTH ~ SL_MIDPOINT * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m1_df
## 
##      AIC      BIC   logLik deviance df.resid 
##   4338.3   4379.8  -2161.1   4322.3     1323 
## 
## Random effects:
## 
## Conditional model:
##  Groups        Name        Variance Std.Dev.
##  CLUTCH_NUMBER (Intercept) 1.044    1.022   
##  Residual                  1.346    1.160   
## Number of obs: 1331, groups:  CLUTCH_NUMBER, 48
## 
## Dispersion estimate for gaussian family (sigma^2): 1.35 
## 
## Conditional model:
##                              Estimate Std. Error z value Pr(>|z|)   
## (Intercept)                  3.710960   3.212695   1.155  0.24805   
## SL_MIDPOINT                  0.094769   0.033312   2.845  0.00444 **
## TEMPERATURE28.5              1.737605   5.493089   0.316  0.75176   
## TEMPERATURE30                7.100067   4.815692   1.474  0.14038   
## SL_MIDPOINT:TEMPERATURE28.5 -0.009484   0.058743  -0.162  0.87174   
## SL_MIDPOINT:TEMPERATURE30   -0.066058   0.051056  -1.294  0.19572   
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Anova
model1a |> Anova()
Confint
model1a |> confint()
##                                         2.5 %      97.5 %     Estimate
## (Intercept)                       -2.58580723 10.00772625  3.710959510
## SL_MIDPOINT                        0.02947975  0.16005857  0.094769159
## TEMPERATURE28.5                   -9.02865192 12.50386250  1.737605288
## TEMPERATURE30                     -2.33851551 16.53864908  7.100066787
## SL_MIDPOINT:TEMPERATURE28.5       -0.12461817  0.10564964 -0.009484266
## SL_MIDPOINT:TEMPERATURE30         -0.16612479  0.03400911 -0.066057843
## Std.Dev.(Intercept)|CLUTCH_NUMBER  0.82884321  1.25961036  1.021772722
r-squared
model1a |> r2_nakagawa()
## # R2 for Mixed Models
## 
##   Conditional R2: 0.504
##      Marginal R2: 0.120

Summary figure

m1_df <-  
  m1_df |> 
  drop_na(SL_MIDPOINT)

m1.sl <- emmeans(model1a, ~ SL_MIDPOINT*TEMPERATURE, 
                 at =list(SL_MIDPOINT=seq(from =min(m1_df$SL_MIDPOINT), to =max(m1_df$SL_MIDPOINT), by=.25)))

m1.sl.df <- as.data.frame(m1.sl)

m1.sl.obs <- drop_na(m1_df, SL_MIDPOINT, STANDARD_LENGTH) |> 
  mutate(Pred =predict(model1a, re.form=NA, type ='response'), 
         Resid =residuals(model1a, type ='response'), 
         Fit =Pred+Resid) 

m1.sl.obs.summarize <-  m1.sl.obs |> 
  group_by(CLUTCH_NUMBER, TEMPERATURE) |> 
  summarise(mean.sl =mean(Fit, na.rm=TRUE), 
            mean.sl.female =mean(SL_MIDPOINT, na.rm = TRUE), 
            sd.sl =sd(Fit, na.rm =TRUE), 
            n.sl = n()) |> 
  mutate(se.sl = sd.sl / sqrt(n.sl), 
         lower.ci.sl =mean.sl - qt(1-(0.05/2), n.sl -1) * se.sl, 
         upper.ci.sl =mean.sl + qt(1-(0.05/2), n.sl -1) * se.sl) |> 
  ungroup()
## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m1.sl.df, aes(x=SL_MIDPOINT, y=emmean)) + 
  stat_smooth(aes(color=TEMPERATURE), 
              method = "lm") + 
  geom_pointrange(data = m1.sl.obs.summarize, aes(x =mean.sl.female, 
                                                  y =mean.sl, 
                                                  ymin =lower.ci.sl, 
                                                  ymax =upper.ci.sl, 
                                                  color = TEMPERATURE)) +  
  scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) + 
  scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
  facet_wrap(~TEMPERATURE)+
  xlab("PARENTAL MIDPOINT STANDARD LENGTH (mm)") + 
  ylab("OFFSPRING STANDARD LENGTH (mm)") + 
  ggtitle("Offspring-male relationship") +
  theme_classic()
## `geom_smooth()` using formula = 'y ~ x'

2-month

Models

Main hypothesis
model1a <- glmmTMB(STANDARD_LENGTH ~ SL_MIDPOINT*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(), 
                    data=m2_df)
Alternative hypothesis
model1b <- glmmTMB(STANDARD_LENGTH ~ SL_MIDPOINT*TEMPERATURE*as.numeric(PARENTAL_DAYS_IN_TREATMENT) + (1|CLUTCH_NUMBER), 
                    family='gaussian', 
                    data=m2_df)

Model selection

AIC(modelNULL, model1a, model1b, k=12) 
## Warning in AIC.default(modelNULL, model1a, model1b, k = 12): models are not all
## fitted to the same number of observations
BIC(modelNULL, model1a, model1b)
## Warning in BIC.default(modelNULL, model1a, model1b): models are not all fitted
## to the same number of observations

Model validation

DHARMa
model1a |> 
  simulateResiduals(plot=TRUE)  

## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help. 
##  
## Scaled residual values: 0.004 0.112 0.024 0.104 0.424 0.348 0.4 0.524 0.436 0.328 0.528 0.792 0.62 0.612 0.184 0.476 0.612 0.368 0.336 0.54 ...
model1a |> testResiduals(plot=T) 

## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.075278, p-value = 2.141e-06
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.0001, p-value = 0.992
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 17, observations = 1213, p-value = 0.03297
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.008184788 0.022344553
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.01401484
## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.075278, p-value = 2.141e-06
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.0001, p-value = 0.992
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 17, observations = 1213, p-value = 0.03297
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.008184788 0.022344553
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.01401484
performance
model1a |> 
  check_model(detrend=FALSE)
## `check_outliers()` does not yet support models of class `glmmTMB`.

hist(m2_df$STANDARD_LENGTH) 

#ggplot(m2_df, aes(x=STANDARD_LENGTH, color=TEMPERATURE, fill=TEMPERATURE)) + 
  #geom_histogram(alpha=0.5) + 
 # theme_classic()

Models - transformations

Main hypothesis - sqrt transformation
model1a.sqrt <- glmmTMB(sqrt(STANDARD_LENGTH) ~ SL_MIDPOINT*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(link="identity"), 
                    data=m2_df)

model1a.log <- glmmTMB(log(STANDARD_LENGTH) ~ SL_MIDPOINT*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(link="identity"), 
                    data=m2_df)

Model selection

AIC(modelNULL, model1a, model1a.sqrt,model1a.log,   k=4) 
## Warning in AIC.default(modelNULL, model1a, model1a.sqrt, model1a.log, k = 4):
## models are not all fitted to the same number of observations
BIC(modelNULL, model1a, model1a.sqrt,model1a.log)
## Warning in BIC.default(modelNULL, model1a, model1a.sqrt, model1a.log): models
## are not all fitted to the same number of observations

Conclusion: The log transformation greatly improves model performance. The log-transformed model will be used moving forward. Lets move on to model validations

Model validation

DHARMa
model1a.log |> 
  simulateResiduals(plot=TRUE)  
## qu = 0.75, log(sigma) = -2.524445 : outer Newton did not converge fully.

## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help. 
##  
## Scaled residual values: 0 0.128 0.024 0.108 0.452 0.38 0.444 0.556 0.464 0.348 0.556 0.784 0.64 0.62 0.196 0.48 0.636 0.396 0.388 0.56 ...
model1a.log |> testResiduals(plot=T) 

## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.10294, p-value = 1.368e-11
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.0004, p-value = 0.984
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 26, observations = 1213, p-value =
## 8.987e-06
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.01404833 0.03124958
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.02143446
## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.10294, p-value = 1.368e-11
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.0004, p-value = 0.984
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 26, observations = 1213, p-value =
## 8.987e-06
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.01404833 0.03124958
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.02143446
performance
model1a.log |> 
  check_model(detrend=FALSE)
## `check_outliers()` does not yet support models of class `glmmTMB`.

Partial effect plots

model1a.sqrt |> ggemmeans(~SL_MIDPOINT|TEMPERATURE) |> 
  plot(add.data =FALSE) 
## Model has sqrt-transformed response. Back-transforming predictions to
##   original response scale. Standard errors are still on the sqrt-scale.

model1a.sqrt |> ggemmeans(~TEMPERATURE) |> 
  plot(add.data =FALSE) 
## NOTE: Results may be misleading due to involvement in interactions
## Model has sqrt-transformed response. Back-transforming predictions to
##   original response scale. Standard errors are still on the sqrt-scale.

Model investigation

Summary
model1a.sqrt |> summary()
##  Family: gaussian  ( identity )
## Formula:          
## sqrt(STANDARD_LENGTH) ~ SL_MIDPOINT * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m2_df
## 
##      AIC      BIC   logLik deviance df.resid 
##    306.7    347.5   -145.3    290.7     1205 
## 
## Random effects:
## 
## Conditional model:
##  Groups        Name        Variance Std.Dev.
##  CLUTCH_NUMBER (Intercept) 0.008334 0.09129 
##  Residual                  0.070568 0.26565 
## Number of obs: 1213, groups:  CLUTCH_NUMBER, 45
## 
## Dispersion estimate for gaussian family (sigma^2): 0.0706 
## 
## Conditional model:
##                               Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                  4.5680403  0.3463791  13.188   <2e-16 ***
## SL_MIDPOINT                 -0.0007419  0.0035725  -0.208    0.835    
## TEMPERATURE28.5             -0.3097405  0.5622775  -0.551    0.582    
## TEMPERATURE30                0.2568173  0.5282549   0.486    0.627    
## SL_MIDPOINT:TEMPERATURE28.5  0.0033209  0.0059915   0.554    0.579    
## SL_MIDPOINT:TEMPERATURE30   -0.0030421  0.0055681  -0.546    0.585    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Anova
model1a.sqrt |> Anova()
Confint
model1a.sqrt |> confint()
##                                          2.5 %      97.5 %      Estimate
## (Intercept)                        3.889149826 5.246930855  4.5680403408
## SL_MIDPOINT                       -0.007743953 0.006260161 -0.0007418964
## TEMPERATURE28.5                   -1.411784137 0.792303094 -0.3097405213
## TEMPERATURE30                     -0.778543243 1.292177900  0.2568173288
## SL_MIDPOINT:TEMPERATURE28.5       -0.008422291 0.015064054  0.0033208814
## SL_MIDPOINT:TEMPERATURE30         -0.013955311 0.007871187 -0.0030420618
## Std.Dev.(Intercept)|CLUTCH_NUMBER  0.069457445 0.119981456  0.0912885828
r-squared
model1a.sqrt |> r2_nakagawa()
## # R2 for Mixed Models
## 
##   Conditional R2: 0.110
##      Marginal R2: 0.005

Summary figure

m2_df <-  
  m2_df |> 
  drop_na(SL_MIDPOINT)
m2.sl <- emmeans(model1a.sqrt, ~ SL_MIDPOINT*TEMPERATURE, 
                 at =list(SL_MIDPOINT=seq(from =min(m2_df$SL_MIDPOINT), to =max(m2_df$SL_MIDPOINT), by=.25)))

m2.sl.df <- as.data.frame(m2.sl)

m2.sl.obs <- drop_na(m2_df, SL_MIDPOINT, STANDARD_LENGTH) |> 
  mutate(Pred =predict(model1a.sqrt, re.form=NA, type ='response'), 
         Resid =residuals(model1a.sqrt, type ='response'), 
         Fit =Pred+Resid) 

m2.sl.obs.summarize <-  m2.sl.obs |> 
  group_by(CLUTCH_NUMBER, TEMPERATURE) |> 
  summarise(mean.sl =mean(Fit, na.rm=TRUE), 
            mean.sl.male =mean(SL_MIDPOINT, na.rm = TRUE), 
            sd.sl =sd(Fit, na.rm =TRUE), 
            n.sl = n()) |> 
  mutate(se.sl = sd.sl / sqrt(n.sl), 
         lower.ci.sl =mean.sl - qt(1-(0.05/2), n.sl -1) * se.sl, 
         upper.ci.sl =mean.sl + qt(1-(0.05/2), n.sl -1) * se.sl) |> 
  ungroup()
## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m2.sl.df, aes(x=SL_MIDPOINT, y=emmean)) + 
  stat_smooth(aes(color=TEMPERATURE), 
              method = "lm", 
              formula = y ~ x) + 
  geom_pointrange(data = m2.sl.obs.summarize, aes(x =mean.sl.male, 
                                                  y =mean.sl, 
                                                  ymin =lower.ci.sl, 
                                                  ymax =upper.ci.sl, 
                                                  color = TEMPERATURE)) +  
  scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) + 
  scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
  facet_wrap(~TEMPERATURE)+
  xlab("PARENTAL MIDPOINT STANDARD LENGTH (mm)") + 
  ylab("OFFSPRING STANDARD LENGTH (mm)") + 
  ggtitle("Offspring-male relationship") +
  theme_classic()

2.5-month

Models

Main hypothesis
model1a <- glmmTMB(STANDARD_LENGTH ~ SL_MIDPOINT*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(link ="identity"), 
                    data=m2.5_df)
Alternative hypothesis
model1b <- glmmTMB(STANDARD_LENGTH ~ SL_MIDPOINT*TEMPERATURE*as.numeric(PARENTAL_DAYS_IN_TREATMENT) + (1|CLUTCH_NUMBER), 
                    family=gaussian(link ="identity"), 
                    data=m2.5_df)

Model selection

AIC(modelNULL, model1a, model1b, k=12) 
## Warning in AIC.default(modelNULL, model1a, model1b, k = 12): models are not all
## fitted to the same number of observations
BIC(modelNULL, model1a, model1b)
## Warning in BIC.default(modelNULL, model1a, model1b): models are not all fitted
## to the same number of observations

Once again the NULL model seems to outperform our hypothesis testing models. Let’s follow the steps that we conducted for 2-month data and appy a log transformation to our dataset to see if it improved the model.

Models

Main hypothesis
model1a.log <- glmmTMB(log(STANDARD_LENGTH) ~ SL_MIDPOINT*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(link="identity"), 
                    data=m2.5_df)

model1a.sqrt <- glmmTMB(sqrt(STANDARD_LENGTH) ~ SL_MIDPOINT*TEMPERATURE + (1|CLUTCH_NUMBER), 
                    family=gaussian(link ="identity"), 
                    data=m2.5_df)

Model selection

AIC(modelNULL, model1a, model1a.log,model1a.sqrt, k=5) 
## Warning in AIC.default(modelNULL, model1a, model1a.log, model1a.sqrt, k = 5):
## models are not all fitted to the same number of observations
BIC(modelNULL, model1a, model1a.log,model1a.sqrt)
## Warning in BIC.default(modelNULL, model1a, model1a.log, model1a.sqrt): models
## are not all fitted to the same number of observations

Model validation

DHARMa
model1a.log |> 
  simulateResiduals(plot=TRUE)  

## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help. 
##  
## Scaled residual values: 0.904 0.888 0.812 0.5 0.664 0.764 0.84 0.42 0.84 0.456 0.94 0.348 0.692 0.832 0.928 0.744 0.88 0.98 0.8 0.328 ...
model1a.log |> testResiduals(plot=T) 

## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.070529, p-value = 5.391e-06
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 0.98904, p-value = 0.864
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 24, observations = 1289, p-value =
## 0.000198
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.01196512 0.02757771
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.01861908
## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.070529, p-value = 5.391e-06
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 0.98904, p-value = 0.864
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 24, observations = 1289, p-value =
## 0.000198
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.01196512 0.02757771
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                             0.01861908
performance
model1a.log |> 
  check_model(detrend=FALSE)
## `check_outliers()` does not yet support models of class `glmmTMB`.

Partial effect plots

model1a.log |> ggemmeans(~SL_MIDPOINT|TEMPERATURE) |> 
  plot(add.data =FALSE) 
## Model has log-transformed response. Back-transforming predictions to
##   original response scale. Standard errors are still on the log-scale.

model1a.log |> ggemmeans(~TEMPERATURE) |> 
  plot(add.data =FALSE) 
## NOTE: Results may be misleading due to involvement in interactions
## Model has log-transformed response. Back-transforming predictions to
##   original response scale. Standard errors are still on the log-scale.

Model investigation

Summary
model1a.log |> summary()
##  Family: gaussian  ( identity )
## Formula:          
## log(STANDARD_LENGTH) ~ SL_MIDPOINT * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m2.5_df
## 
##      AIC      BIC   logLik deviance df.resid 
##  -1721.8  -1680.5    868.9  -1737.8     1281 
## 
## Random effects:
## 
## Conditional model:
##  Groups        Name        Variance Std.Dev.
##  CLUTCH_NUMBER (Intercept) 0.002266 0.0476  
##  Residual                  0.014284 0.1195  
## Number of obs: 1289, groups:  CLUTCH_NUMBER, 50
## 
## Dispersion estimate for gaussian family (sigma^2): 0.0143 
## 
## Conditional model:
##                               Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                  3.0021622  0.1632880  18.386   <2e-16 ***
## SL_MIDPOINT                  0.0018651  0.0016939   1.101    0.271    
## TEMPERATURE28.5              0.0500368  0.2772671   0.180    0.857    
## TEMPERATURE30                0.2036245  0.2394468   0.850    0.395    
## SL_MIDPOINT:TEMPERATURE28.5 -0.0006479  0.0029646  -0.219    0.827    
## SL_MIDPOINT:TEMPERATURE30   -0.0024310  0.0025427  -0.956    0.339    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Anova
model1a.log |> Anova()
Confint
model1a.log |> confint()
##                                          2.5 %      97.5 %      Estimate
## (Intercept)                        2.682123614 3.322200775  3.0021621947
## SL_MIDPOINT                       -0.001454866 0.005185117  0.0018651255
## TEMPERATURE28.5                   -0.493396679 0.593470289  0.0500368050
## TEMPERATURE30                     -0.265682655 0.672931581  0.2036244632
## SL_MIDPOINT:TEMPERATURE28.5       -0.006458364 0.005162652 -0.0006478557
## SL_MIDPOINT:TEMPERATURE30         -0.007414637 0.002552685 -0.0024309759
## Std.Dev.(Intercept)|CLUTCH_NUMBER  0.036826284 0.061530783  0.0476019964
r-squared
model1a.log |> r2_nakagawa()
## # R2 for Mixed Models
## 
##   Conditional R2: 0.149
##      Marginal R2: 0.014

Summary figure

m2.5_df <- m2.5_df |> 
  drop_na(SL_MIDPOINT)
m2.5.sl <- emmeans(model1a.log, ~ SL_MIDPOINT*TEMPERATURE, 
                 at =list(SL_MIDPOINT=seq(from =min(m2.5_df$SL_MIDPOINT), to =max(m2.5_df$SL_MIDPOINT), by=.25)))

m2.5.sl.df <- as.data.frame(m2.5.sl)

m2.5.sl.obs <- drop_na(m2.5_df, SL_MIDPOINT, STANDARD_LENGTH) |> 
  mutate(Pred =predict(model1a.log, re.form=NA, type ='response'), 
         Resid =residuals(model1a.log, type ='response'), 
         Fit =Pred+Resid) 

m2.5.sl.obs.summarize <-  m2.5.sl.obs |> 
  group_by(CLUTCH_NUMBER, TEMPERATURE) |> 
  summarise(mean.sl =mean(Fit, na.rm=TRUE), 
            mean.sl.male =mean(SL_MIDPOINT, na.rm = TRUE), 
            sd.sl =sd(Fit, na.rm =TRUE), 
            n.sl = n()) |> 
  mutate(se.sl = sd.sl / sqrt(n.sl), 
         lower.ci.sl =mean.sl - qt(1-(0.05/2), n.sl -1) * se.sl, 
         upper.ci.sl =mean.sl + qt(1-(0.05/2), n.sl -1) * se.sl) |> 
  ungroup()
## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m2.5.sl.df, aes(x=SL_MIDPOINT, y=emmean)) + 
  stat_smooth(aes(color=TEMPERATURE), 
              method = "lm", 
              formula = y ~ x) + 
  geom_pointrange(data = m2.5.sl.obs.summarize, aes(x =mean.sl.male, 
                                                  y =mean.sl, 
                                                  ymin =lower.ci.sl, 
                                                  ymax =upper.ci.sl, 
                                                  color = TEMPERATURE)) +  
  scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) + 
  scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
  facet_wrap(~TEMPERATURE)+
  xlab("PARENTAL MALE STANDARD LENGTH (mm)") + 
  ylab("OFFSPRING STANDARD LENGTH (mm)") + 
  ggtitle("Offspring-male relationship") +
  theme_classic()